I want to have a standard notebook to see if the emulator's I've made are workign as expected, as I've been implementing a few different AB models.


In [1]:
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path

In [2]:
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()

In [3]:
#training_dir = '/u/ki/swmclau2/des/PearceLHC_wp_z_fscab_emulator/'
training_dir = '/u/ki/swmclau2/des/PearceLHC_wp_z_corrab_emulator/'

em_method = 'gp'
split_method = 'random'

In [4]:
a = 1.0
z = 1.0/a - 1.0

In [5]:
fixed_params = {'z':z}#, 'r':0.18477483}
n_leaves, n_overlap = 10, 2 emu = ExtraCrispy(training_dir, n_leaves, n_overlap, split_method, method = em_method, fixed_params=fixed_params)

In [6]:
emu = OriginalRecipe(training_dir, method = em_method, fixed_params=fixed_params)


---------------------------------------------------------------------------
KeyError                                  Traceback (most recent call last)
<ipython-input-6-774945beabd8> in <module>()
----> 1 emu = OriginalRecipe(training_dir, method = em_method, fixed_params=fixed_params)

/u/ki/swmclau2/.conda/envs/hodemulator/lib/python2.7/site-packages/pearce/emulator/emu.pyc in __init__(self, training_dir, method, hyperparams, fixed_params, independent_variable)
     73         self.independent_variable = independent_variable
     74 
---> 75         self.load_training_data(training_dir)
     76         self.build_emulator(hyperparams)
     77 

/u/ki/swmclau2/.conda/envs/hodemulator/lib/python2.7/site-packages/pearce/emulator/emu.pyc in load_training_data(self, training_dir)
    283         xs, ys,yerrs, ycovs = [], [], [], []
    284         for td in training_dir:
--> 285             x, y,yerr, ycov = self.get_data(td, {}, self.fixed_params, self.independent_variable)
    286             xs.append(x)
    287             ys.append(y)

/u/ki/swmclau2/.conda/envs/hodemulator/lib/python2.7/site-packages/pearce/emulator/emu.pyc in get_data(self, data_dir, em_params, fixed_params, independent_variable)
    197                     if pname not in fixed_params and pname not in {'z', 'r'}:  # may need to be input_params
    198                         # note we have to repeat for each scale bin
--> 199                         file_params.append(np.ones((scale_nbins,)) * params[pname])
    200 
    201                 # r and z will always be at the end.

KeyError: 'mean_occupation_centrals_assembias_corr1'
i = 100 plt.plot(emu.scale_bin_centers, emu.y[i*len(emu.scale_bin_centers):(i+1)*len(emu.scale_bin_centers)]+emu.y_hat) plt.xscale('log') print emu.x[i*len(emu.scale_bin_centers),:] print emu.x[i*len(emu.scale_bin_centers):(i+1)*len(emu.scale_bin_centers), -1]

In [ ]:
emu._ordered_params

In [ ]:
print emu.y

In [ ]:
metric = emu._get_initial_guess(None)

In [ ]:
metric

In [ ]:
new_hp = np.array([  80.75541473 , 179.90950523 ,  66.97808312 ,  89.17850468  ,225.64738711,
   21.02835102])
new_hp_pnames = ['mean_occupation_centrals_assembias_param1',
 'mean_occupation_centrals_assembias_slope1',
 'mean_occupation_centrals_assembias_split1',
 'mean_occupation_satellites_assembias_param1',
 'mean_occupation_satellites_assembias_slope1',
 'mean_occupation_satellites_assembias_split1']
metric.update(zip(new_hp_pnames, new_hp)) emu._build_gp({'metric':metric})

In [ ]:
from pearce.mocks import compute_prim_haloprop_bins, cat_dict

In [ ]:
cosmo_params = {'simname':'chinchilla', 'Lbox':400.0, 'scale_factors':[a]}
cat = cat_dict[cosmo_params['simname']](**cosmo_params)#construct the specified catalog!

cat.load_catalog(a)
#halo_masses = cat.halocat.halo_table['halo_mvir']

In [ ]:
cat.load_model(a, 'corrRedMagic')

In [ ]:
r_bins = np.logspace(-1, 1.6, 12)
rpoints = (r_bins[1:]+r_bins[:-1])/2

In [ ]:
rpoints
names = ['amp'] names.extend(emu._ordered_params.keys()) emu._build_gp({'metric': {key: val for key, val in zip(names,\ np.exp(np.array([ -0.86111585, 0.6222047, -0.44600521, -2.73069034, -1.43866719, -4.22174659, -0.20519636, -0.16024094, -1.3396195, -2.10916469, 23.50698047, -0.67395582, -1.23835348, -1.046121 ])))} })

In [ ]:
fig = plt.figure(figsize=(10,8))

i = 50

emulation_point = [('f_c', 0.233), ('logM0', 12.0), ('sigma_logM', 0.333),
                    ('alpha', 1.053),('logM1', 13.5), ('logMmin', 12.033)]

em_params = dict(emulation_point)

em_params.update(fixed_params)
del em_params['z']

ab_params = {'mean_occupation_satellites_assembias_param1':0.0,
                'mean_occupation_centrals_assembias_param1':0.0,
                'mean_occupation_satellites_assembias_slope1':5.0,
                'mean_occupation_centrals_assembias_slope1':5.0,
                 'mean_occupation_satellites_assembias_split1':0.5,
                'mean_occupation_centrals_assembias_split1':0.5}

em_params.update(ab_params)

em_params = {key: emu.x[i*len(emu.scale_bin_centers), idx] for idx, key in enumerate(emu._ordered_params.keys()[:-1]) }

key = 'mean_occupation_centrals_assembias_param1'
print em_params[key]
em_params[key] = 0.0


wp = emu.emulate_wrt_r(em_params,rpoints)[0]

print wp
    

plt.plot(rpoints, wp, label = 'Emulator' )
plt.plot(emu.scale_bin_centers, emu.y[i*len(emu.scale_bin_centers):(i+1)*len(emu.scale_bin_centers)]+emu.y_hat)
plt.plot(emu.scale_bin_centers, np.ones_like(emu.scale_bin_centers)*emu.y_hat)
plt.xscale('log')
plt.legend(loc='best')
#plt.xlim([0.1, 4])
#plt.title(r'$w(\theta)$ variance by %s'%param)
#plt.xlabel(r'$\theta$')
#plt.ylabel(r'$w(\theta)$')
plt.show()

In [ ]:
import scipy.optimize as op
from itertools import izip

In [ ]:


In [ ]:
def nll(p):
    # Update the kernel parameters and compute the likelihood.
    # params are log(a) and log(m)
    #ll = 0
    #for emulator, _y in izip(self._emulators, self.y):
    #    emulator.kernel[:] = p
    #    ll += emulator.lnlikelihood(_y, quiet=True)
    emu._emulator.kernel[:] = p
    print p
    ll= emu._emulator.lnlikelihood(emu.y, quiet=False)

    # The scipy optimizer doesn't play well with infinities.
    return -ll if np.isfinite(ll) else 1e25

# And the gradient of the objective function.
def grad_nll(p):
    # Update the kernel parameters and compute the likelihood.
    #gll = 0
    #for emulator, _y in izip(self._emulators, self.y):
    #    emulator.kernel[:] = p
    #    gll += emulator.grad_lnlikelihood(_y, quiet=True)
    emu._emulator.kernel[:] = p
    gll = emu._emulator.grad_lnlikelihood(emu.y, quiet=True)
    return -gll

In [ ]:
emu.goodness_of_fit(training_dir)

In [ ]:
p0 = emu._emulator.kernel.vector

In [ ]:
p0 = np.log(np.random.rand(emu._emulator.kernel.vector.shape[0]))
results = op.minimize(nll, p0, jac=grad_nll)

In [ ]:
print results.x
print results.success

In [ ]:
from sklearn.model_selection import RandomizedSearchCV
from sklean.metrics import make_scorer

In [ ]:
def my_loss_func(predictions, emu.y)

In [ ]: